Started 'base (Python 3.9.7)' kernel
Python 3.9.7 (default, Sep 16 2021, 13:09:58)
Type 'copyright', 'credits' or 'license' for more information
IPython 7.29.0 -- An enhanced Interactive Python. Type '?' for help.
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[-1, conditioned_neuron] - np.mean(record_r[-1, :]))
else:
error_val = self.b*(record_r[-1, conditioned_neuron] - record_r[-1, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = network.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
activity_manifold, activity, eig_val, eig_vec, pr, cov = network.calculate_manifold(T, 10, I, pulse_end=pulse_end)
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[:10])
print(cn)
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=0)
activity_manifold, activity, eig_val, eig_vec, pr, cov = network.calculate_manifold(T, 10, I, pulse_end=pulse_end)
# plot activity of network at the end of learning trials, normal and ordered. Calculate PR, 90% cutoff var, etc
putils.plot_dynamics(np.tanh(r_learn), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_learn), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio Final: {pr}")
print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=1, manifold_eig_vec=eig_vec, manifold_eig_vals=eig_val)
activity_manifold, activity, eig_val, eig_vec, pr, cov = network.calculate_manifold(T, 10, I, pulse_end=pulse_end)
# plot activity of network at the end of learning trials, normal and ordered. Calculate PR, 90% cutoff var, etc
putils.plot_dynamics(np.tanh(r_learn), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_learn), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio Final: {pr}")
print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 490
Participation Ratio: 6.790804844694585 10 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio Final: 7.7798998158026444 14 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio Final: 6.443054771676237 11
'\nsimulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold → \nday 1 complete → repeat for day 2 with different conditioned neuron\n'
[]*5
[]
l = []*5
print(l)
[]
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[-1, conditioned_neuron] - np.mean(record_r[-1, :]))
else:
error_val = self.b*(record_r[-1, conditioned_neuron] - record_r[-1, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = self.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
def initialize_network():
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[-10:])
print(cn)
return network, r_simulation, cn
def plot_simulation(r_simulation, cn, pr):
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
# print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
return sorted_array, cn_new_idx
def simulate_day(network, r_simulation, cn, day_id, input=None):
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2])
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
return r_learn
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
n_days = 4
dict_manifold = []
print(dict_manifold)
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
network, r_simulation, cn = initialize_network()
plot_simulation(r_simulation, cn, dict_manifold[0][4])
r_learn = r_simulation
for i in range(n_days):
r_learn = simulate_day(network, r_learn, cn, i+1, input=I)
plot_simulation(r_learn, cn, dict_manifold[i][4])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
[] Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 135
Participation Ratio: 5.435180593940206 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 5.435180593940206 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 4.586490550350377 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 4.843524599577646 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 4.801200582705043
'\nsimulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold → \nday 1 complete → repeat for day 2 with different conditioned neuron\n'
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[-1, conditioned_neuron] - np.mean(record_r[-1, :]))
else:
error_val = self.b*(record_r[-1, conditioned_neuron] - record_r[-1, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = self.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
def initialize_network():
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[-10:])
print(cn)
return network, r_simulation, cn
def plot_simulation(r_simulation, cn, pr):
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
# print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
return sorted_array, cn_new_idx
def simulate_day(network, r_simulation, cn, day_id, input=None):
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2])
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
return r_learn
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
n_days = 4
dict_manifold = []
print(dict_manifold)
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
network, r_simulation, cn = initialize_network()
plot_simulation(r_simulation, cn, dict_manifold[0][4])
r_learn = r_simulation
for i in range(n_days):
r_learn = simulate_day(network, r_learn, cn, i+1, input=I)
plot_simulation(r_learn, cn, dict_manifold[i][4])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
[] Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 321
Participation Ratio: 5.403614314426269 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 5.403614314426269 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 5.4370867982861055 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 5.954611403038912 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 5.343379126089124
'\nsimulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold → \nday 1 complete → repeat for day 2 with different conditioned neuron\n'
from distutils.log import error
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[i, conditioned_neuron] - np.mean(record_r[i, :]))
else:
error_val = self.b*(record_r[i, conditioned_neuron] - record_r[i, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
print(error_val, record_r[i, conditioned_neuron])
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = self.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
def initialize_network():
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[:10])
print(cn)
return network, r_simulation, cn
def plot_simulation(r_simulation, cn, pr):
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
# print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
return sorted_array, cn_new_idx
def simulate_day(network, r_simulation, cn, day_id, input=None):
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2])
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
return r_learn
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
n_days = 3
dict_manifold = []
print(dict_manifold)
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
network, r_simulation, cn = initialize_network()
plot_simulation(r_simulation, cn, dict_manifold[0][4])
# print(r_simulation[-1])
r_learn = r_simulation
for i in range(n_days):
r_learn = simulate_day(network, r_learn, cn, i+1, input=I)
plot_simulation(r_learn, cn, dict_manifold[i][4])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
[] Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 303
Participation Ratio: 10.409640964860152 -0.08535991148355412 0.1040875572526452 -0.08595904567483917 0.10074787555836635 -0.08657275460550938 0.0982949825395173 -0.08726299083307289 0.09674428325017395 -0.08796418132063076 0.09612644118785585 -0.0887340612600374 0.09641948956046649 -0.08951196984840477 0.0977051094180533 -0.09036034589302938 0.09998812412441078 -0.09121392317161578 0.10333883877058361 -0.09214344636115708 0.10785149087450657 -0.09307505024080129 0.1135234224729483 -0.09408884365623892 0.12060031442329894 -0.09510096323590761 0.1289438113931099 -0.09620012897870928 0.13900861824429167 -0.09729292215235369 0.15045975095774516 -0.09847459695370522 0.16400954717875446 -0.09964400652218158 0.17907088429386916 -0.10090016228314179 0.1966514879168443 -0.10213682732340783 0.21586296786182604 -0.10345396666844256 0.2380376498007988 -0.1047430792527407 0.2619398882605436 -0.10610274166319977 0.28925234699257557 -0.10742475133170572 0.31834345224382066 -0.1088045847444548 0.3512729015816697 -0.1101364832707376 0.38595732572174724 -0.11151163159604298 0.4248616380383752 -0.11282853114856611 0.46538856481742474 -0.1141731517277206 0.5104325392901373 -0.11544991391398139 0.556823530773021 -0.11673867802989377 0.6078941543804461 -0.11795138988592903 0.6598653816477653 -0.11916083760308728 0.7164843851578163 -0.12028798193497058 0.7733766987794799 -0.12139763110942463 0.8346312620765951 -0.1224208626106698 0.8953688123396955 -0.12341395123277303 0.9598885045647445 -0.12431837029328606 1.0229881144203608 -0.12518196505488002 1.0889913473285395 -0.1259556483377533 1.1526351666817354 -0.12667971718290472 1.2180484649596486 -0.1273122261322676 1.2802136111562004 -0.12788745229821966 1.3428420193312498 -0.12836736491601838 1.4014630347476158 -0.12878210242651192 1.459175613329923 -0.12909431091292942 1.5123086731596018 -0.12933190865960392 1.5632064962492407 -0.12945611220930364 1.609171594625457 -0.1294942874668009 1.6517219815279578 -0.12940611662903714 1.6892151719568098 -0.12921951957022423 1.7223521715983319 -0.12889465444468828 1.7505316789273024 -0.12846021166208027 1.7737267829422865 -0.12788015125177982 1.7922745020384105 -0.12718305981898914 1.8055722093246156 -0.1263399847086105 1.8147192493732551 -0.12537756415102952 1.8187214349711074 -0.12427600205217644 1.819217599958292 -0.12305767792837304 1.8150025470243363 -0.12171242894972636 1.808017696702755 -0.1202562835611153 1.796997703607518 -0.11868803316274126 1.783966773822293 -0.11701598171683568 1.767711799262043 -0.11524677414895586 1.7501586923255739 -0.11338044368852082 1.7302255561606852 -0.11143048848973869 1.709606510680607 -0.10938876284873902 1.6874055411557398 -0.10727488204008427 1.6649977502595699 -0.10507300864025315 1.6417091012668583 -0.10280834061461917 1.6185487938252834 -0.1004580975987385 1.5950833808193947 -0.09805260434038543 1.5719435621703448 -0.09556313621074201 1.548936675770571 -0.09302466751793542 1.5263320117248327 -0.0904038290071214 1.5041592888085138 -0.08773968877204759 1.4823692401945459 -0.08499588192774318 1.4611787565458374 -0.08221490516621069 1.4402838396977724 -0.07935937241727042 1.420040256472328 -0.07647439030461879 1.3999671488280647 -0.07352367899690268 1.3805028879127508 -0.07055389324767593 1.3610721729046102 -0.06753178888825037 1.3421377468098827 -0.06450416234166371 1.3231058426118698 -0.06144211405164088 1.304409959717728 -0.058390838445835834 1.2854980825176368 -0.05532621047293562 1.2667321694035059 -0.05228995490644914 1.2476426174374247 -0.04926234707942512 1.2284944961298703 -0.04628000254089565 1.208924302927073 -0.04332676824466241 1.189094220401958 -0.04043305391161512 1.1687587177573704 -0.03758540584005058 1.1479881457493475 -0.03480762721529326 1.1266578868582784 -0.032088325239541006 1.104768527452231 -0.02944496191191073 1.0823094648750091 -0.02686786981990473 1.059237234573788 -0.024368944997040662 1.0356361000893257 -0.02194013158385905 1.0114415113606197 -0.01958884090849819 0.9868013948249755 -0.01730860692954492 0.9616468450767834 -0.015103535318769899 0.9361516944827518 -0.01296870412740328 0.9102532882125777 -0.010905995948178506 0.8841162309960727 -0.008911884112818761 0.857690832250282 -0.006986866433215733 0.8311080682709957 -0.005128531381325899 0.8043361513596384 -0.0033365244863478626 0.7774622165153855 -0.001609168311268 0.7504764236264537 5.4481606705948375e-05 0.7234245350713764 0.0016557750571397024 0.696321587285926 0.003196157544124748 0.6691825842135766 0.0046769997651338715 0.6420485659823405 0.00610023235546351 0.6149173686047065 0.007467482834309968 0.5878548060271961 0.008781158881137705 0.5608540412763964 0.01004325332852423 0.5340019266184999 0.011256624320975987 0.5072960223086614 0.012423625226162827 0.4808381587605249 0.013547516862557521 0.4546351156805406 0.014630922393134065 0.4287956412371332 0.015677443102788412 0.4033364426196445 0.01668982652534862 0.3783635541704676 0.017671948061874998 0.35390086752550104 0.01862651103941709 0.3300411297681367 0.019557605923881814 0.3068101954093078 0.020467728367997196 0.2842769278888014 0.021361141709231344 0.2624626677809574 0.022239996215821546 0.24140302752138595 0.023108707346538717 0.22110859839621871 0.02396897894788376 0.20157514348325456 0.024825378318839353 0.1827981727896737 0.025679101459541245 0.16473130413757742 0.026534882955781814 0.1473539245528266 0.02739338751274269 0.13058045332083926 0.028259541149604932 0.11437687981589231 0.02913348707764266 0.09862638091989487 0.030020365358329696 0.08328722379454567 0.030919831449230346 0.06822274895611294 0.0318372448441525 0.053390213286839384 0.032771815594400365 0.03864551456698198 0.033729089997716295 0.023950348927246425 0.034707865535233966 0.009163992532824253 0.035713784500164006 -0.005745021778254535 0.03674524355156244 -0.02090660222310175 0.03780782205291705 -0.036346604531097455 0.03889951271141545 -0.052180581210261694 0.04002561269247793 -0.06843275525014497 0.0411837217994039 -0.0852039069438167 0.042378599350011 -0.10252146433952532 0.04360752101182795 -0.12047269322338765 0.04487444304154708 -0.13909189461101087 0.04617647958883388 -0.15845452320315334 0.04751651909475424 -0.17860298326443358 0.048891774062562274 -0.19960272397399365 0.05030378061013464 -0.22150296057748697 0.05175016437017976 -0.24436168215221576 0.0532307593127323 -0.2682320074975518 0.054743896837769354 -0.29316896468759973 0.056287250588498344 -0.31922636748492456 0.057860053166005336 -0.3464639053916428 0.0594572562822618 -0.37493360146746096 0.06107906879566883 -0.40471109482832884 0.06271715089403278 -0.43584499041516783 0.06437270162496861 -0.46844144822539174 0.06603369651418059 -0.502543866177325 0.06770238760480375 -0.5383057333179853 0.06936304553550401 -0.5757612592676462 0.07101916362989966 -0.6151291676711043 0.07265177077963438 -0.6564250237136664 0.07426592761047231 -0.6999499981295811 0.07584038391366595 -0.7456794598764133 0.07738222430069353 -0.7940083144323593 0.07886928077244469 -0.8448257840389216 0.08031104390065733 -0.898616390747561 0.08168566914186799 -0.9551013563770657 0.083004667604513 -1.0148312408578786 0.08424688203501948 -1.0772499674047458 0.08542443265168702 -1.142944542744944 0.08651662648909042 -1.2109821024974077 0.08753489946646699 -1.281974511856403 0.08845918474561376 -1.3545704440201911 0.08930005800895083 -1.4294278899175885 0.09003863373018048 -1.504822661150044 0.09068543088566677 -1.5815030570828223 0.09122316375944729 -1.6574954257416201 0.09166362525950535 -1.733673545521603 0.09199124167370004 -1.807974285211301 0.09222053903513434 -1.8813937311581916 0.09233750018358071 -1.9519300287131836 0.09236045145304249 -2.020654320053036 0.09227642176556408 -2.08573711603909 0.09210769958259263 -2.1482488919829876 0.09184139619683732 -2.206591181941968 0.0915029101421992 -2.261749444068786 0.09107819387387345 -2.312362046021909 0.09059431968879075 -2.3592498236895603 0.09003504095101579 -2.401254279611208 0.08942775462498742 -2.4389705846004017 0.08875369065683915 -2.471403645159708 0.08803974115277473 -2.498904460037935 0.08726512049016817 -2.520640429297442 0.0864557201628231 -2.5367664508537167 0.08558956123512039 -2.546677449355151 0.08469076033155122 -2.5504470213008625 0.08373665434320927 -2.547809808538635 0.0827483025844175 -2.5388986789341335 0.08170263434607947 -2.523875624781227 0.08061641902037732 -2.5030265015991198 0.07946689688619746 -2.4769199037663263 0.07826606325170295 -2.4459923942969275 0.07699292077927687 -2.4110728433035686 0.07565502573194771 -2.372673519045125 0.07423481925220397 -2.331681171621674 0.07273617172193399 -2.2885990410184682 0.0711461462146972 -2.2442001483640195 0.06946579574725757 -2.198921776341108 0.06768712594902696 -2.1533263061632075 0.06580927659549303 -2.1077546532522575 0.06382863746346296 -2.0625381602260258 0.06174334685924334 -2.017902784721979 0.05955292680058655 -1.973987704582765 0.05725538353558145 -1.930887919113295 0.05485174476406732 -1.8886255677877881 0.05234066762335894 -1.8471507742778777 0.04972308795144839 -1.8064560600452084 0.046999000789630666 -1.7663401693084457 0.044168100527056886 -1.7268442532302806 0.04123234570842914 -1.687624476828771 0.03818984809224944 -1.6488238764097192 0.035045148478072434 -1.6099875125723233 0.031795448650541826 -1.5713793680240822 0.02844845335016636 -1.5324900646659707 0.02500202368309008 -1.4936846628441074 0.021467280605387624 -1.4544729949766104 0.017844593599236125 -1.4152604693624384 0.014147825071842007 -1.3756455751842371 0.010380964010012712 -1.335978473188118 0.006558491408077419 -1.2959860251350317 0.0026875731206781393 -1.255854697905202 -0.0012200338145373357 -1.2154358831905785 -0.005155836857550175 -1.1746654619392467 -0.009114117092051417 -1.133480835072721 -0.013087047571688997 -1.0915334599952098 -0.017076587214075477 -1.048803256068806 -0.021076095206283067 -1.004692296685091 -0.025094220800193304 -0.9592067649226514 -0.02912361671650868 -0.9115981198252172 -0.03317623039973254 -0.8619227780628624 -0.03723978293327311 -0.8094243985298148 -0.04132486566275305 -0.7542655242560785 -0.045408988614984214 -0.6958468727563418 -0.04949761203473935 -0.6345066678251736 -0.053554550473887706 -0.5699663860480699 -0.05757999905075735 -0.5028030845612039 -0.0615253530153481 -0.43318171873989886 -0.06538990117807061 -0.36194203524479157 -0.06911851104017908 -0.28969460409824077 -0.07271572070213582 -0.21747842787471466 -0.07612684238732356 -0.1461670954516131 -0.0793657932512762 -0.07686330931361385 -0.08238278779672015 -0.010403051134852043 -0.08520165963125095 0.05216005104724411 -0.08777891155707769 0.11030468117880435 -0.09014669142830065 0.16307132970069801 -0.09226791335839256 0.21042576839711888 -0.09418095806020105 0.251510219068902 -0.09585499766429972 0.2868239484978348 -0.09733213556819134 0.3156117633014951 -0.09858712280115656 0.33883969190562396 -0.09966262537272375 0.35586741309982417 -0.10053775326996046 0.3679938239804405 -0.10125244724055649 0.3747260006093069 -0.10178895545268861 0.3775405365755153 -0.10218190399249916 0.3761281478017844 -0.10241600282189744 0.37198519572438826 -0.10251918971488243 0.36499342249217526 -0.1024786872886569 0.3565158423685422 -0.10231563949975649 0.34658854940574757 -0.10202037808412548 0.3363151425546338 -0.10160808629919908 0.32582245846208635 -0.10107288696768009 0.3158888060476124 -0.10042518682157699 0.3066725089378395 -0.09966313396313306 0.2986377978893713 -0.09879334838929443 0.29193653386648927 -0.09781756597122866 0.2867901942374637 -0.09673926124514512 0.2833220621062575 -0.09556294600889892 0.28160518696336956 -0.09428952769560228 0.2817159883025734 -0.09292568822843202 0.2836662429531068 -0.09147064259351902 0.2874672164681413 -0.08993312280903437 0.29313593813502087 -0.08831171184393459 0.3006030923241316 -0.08661714036630007 0.3099308134466855 -0.08484798378912778 0.32095880778511143 -0.08301637400188781 0.3338032260397424 -0.08112075078720007 0.3482097904534134 -0.07917362030063592 0.36433216505819327 -0.07717289341433822 0.3818329893457848 -0.07513068510164972 0.40087954443955837 -0.0730442983118389 0.42108153571877044 -0.07092526778500324 0.4426059233475563 -0.06877053342099783 0.4650546337391064 -0.06659117960303186 0.4885940108737353 -0.06438405282727042 0.5128635979264309 -0.06215988139035682 0.5380321358725358 -0.059915502487397194 0.5638036504568663 -0.05766113048091098 0.5903412772102872 -0.0553933959298646 0.6174092864469986 -0.05312150760071643 0.6451397765850071 -0.050841441722598274 0.6733236525574438 -0.04856074723823741 0.7020261517334632 -0.04627435368163796 0.731018791266055 -0.043987822188716316 0.7602713884829028 -0.0416951869807105 0.7895027594463455 -0.039400448864235424 0.8185847238387776 -0.03709759013796805 0.84718230713137 -0.03479001925969284 0.8750993927178289 -0.032472656495045736 0.9019792669174437 -0.030149012491473073 0.9276087690201971 -0.027815286649397263 0.9516571952196252 -0.025474870919010885 0.9739439214291815 -0.023124642092580895 0.9942053153970458 -0.020766839435269573 1.012323686217809 -0.018397879685963153 1.0281226464734263 -0.016017676845844462 1.0415531606709094 -0.01362121417175981 1.0525234726571049 -0.011205450070775358 1.0610398115304234 -0.00876362548057452 1.0670769777386713 -0.006289962599311334 1.0706746890308543 -0.003776436147288742 1.0718517399795193 -0.001215522144842951 1.0706613602686865 0.001400995955206077 1.0671489559691363 0.004080946276419574 1.0613695304073438 0.006831423408621615 1.053388851332301 0.009659049130032278 1.0432626031226413 0.012568588270471104 1.0310842641679303 0.015564089301453655 1.016919188132549 0.018646961312050698 1.000909176783488 0.021817312155256702 0.9831447120460813 0.025072092465682278 0.9638451084511345 0.028405766944939098 0.9431386124321649 0.031809452337975365 0.9213466434973786 0.03527005830050348 0.8986275048630281 0.03877182469283667 0.8754029021948202 0.04229318870130799 0.8518154499972741 0.04581228155875805 0.8283443335406196 0.04930068867047645 0.8050388630722807 0.05273374584626179 0.7823672104219672 0.05608024968516482 0.7602164371255467 0.0593171870666488 0.738985067363538 0.06241516648809041 0.7183794149684117 0.0653562489422371 0.6986976116770973 0.06811631401709196 0.6795043390818339 0.07068396493421465 0.6609874128219185 0.07304222435550331 0.6426466907972063 0.07518608546261961 0.624562545465138 0.07710626440779154 0.6062594323261173 0.07880313878596819 0.5877259558682717 0.08027475535115401 0.5685888163765919 0.08152568922185095 0.5487718999566125 0.08256038577457366 0.5280482515400122 0.083386624271404 0.5063082711486231 0.08401396387619121 0.4834708438904017 0.0844526192942293 0.4594202727475736 0.08471570309054022 0.4341823514783644 0.08481510218303255 0.4076553620168228 0.08476566807454805 0.37991548239552353 0.08457996684073742 0.35088965784396375 0.08427263913429364 0.3206550951374123 0.08385569817760438 0.2891805630184562 0.0833417934034186 0.2565183106037553 0.08274120070928499 0.22269158844744114 0.08206337939092176 0.18772585171500702 0.08131614196421066 0.1517069976550992 0.08050542675825066 0.1146443175372104 0.07963653902765594 0.07668394535650323 0.07871236789316206 0.03782834055081698 0.07773619682244368 -0.0017314317463498693 0.07670873948429262 -0.04199807817618272 0.07563187864363936 -0.08275927483519746 0.07450498179922839 -0.12402633542179989 0.07332898287520127 -0.16558879371493673 0.07210248727378382 -0.20746887477424247 0.07082574117464774 -0.24947210557819913 0.06949699841815118 -0.29162842629175617 0.06811604259689022 -0.3337598205260304 0.06668113247921775 -0.37589503001144847 0.0651918555473525 -0.4178594672010901 0.06364673689850424 -0.4596698246163661 0.062045364735434705 -0.5011333957888806 0.0603865296326768 -0.5422480527457053 0.058669792992785526 -0.5827806995688137 0.05689388729690293 -0.6227126974294902 0.05505813265618741 -0.6617547666253203 0.05316077636417015 -0.6998845245129837 0.0512007824589808 -0.7367515327342552 0.049175775520351676 -0.7723506551238541 0.047084657581612184 -0.8062774801728672 0.04492481425567809 -0.8385686262803229 0.042695777061054145 -0.868783442398588 0.04039536416747562 -0.8970210412282463 0.0380243110123565 -0.922827507609168 0.035581134272714816 -0.9463716333610173 0.03306746329335296 -0.9672067070977969 0.030481750064769577 -0.9855575033005876 0.027825056208299302 -1.0009987441413395 0.025094209650877906 -1.0137808630303107 0.02228787925370817 -1.0235146550084553 0.01940004754976509 -1.030446022217969 0.016426083417484142 -1.0342484878387457 0.013357283638035539 -1.0351479940036772 0.010186404609487361 -1.0329216258148644 0.0069037841952794814 -1.0277729029040845 0.00350169328389508 -1.0196259164185504 -2.7499680083291622e-05 -1.0086594892971088 -0.0036886529665267833 -0.9949790301369646 -0.007483034445752759 -0.97873068237636 -0.011407714822983002 -0.9602240756362902 -0.015451581775222994 -0.9395582933576249 -0.019597725834101316 -0.9172461761451656 -0.023816709662974035 -0.8933027614183366 -0.028074121978565684 -0.8683872106320633 -0.03232276004477605 -0.8423616653619919 -0.03651692442153747 -0.8159164632415009 -0.040603726641195464 -0.7887013545353483 -0.04454087299515329 -0.7613160366357009 -0.04828539622617339 -0.7332116182556818 -0.051809097221264415 -0.7048329398960477 -0.05508624690854038 -0.6755421706100244 -0.05810292747914722 -0.6456383518117815 -0.06084881894961838 -0.6145465277362777 -0.06331835235785108 -0.5824560597766711 -0.06551139536145295 -0.548971976216621 -0.06742474833531016 -0.5141949421610194 -0.06906420773739966 -0.47796069777437566 -0.07042583839788805 -0.44028404222979334 -0.07151940922866384 -0.4012261464912725 -0.07234010594653144 -0.36071790524051706 -0.07290137091903363 -0.3190091081088155 -0.07319894829905703 -0.27596328665007447 -0.0732500604443052 -0.23197164533365985 -0.07305261282307117 -0.18686318840955654 -0.07262728182532188 -0.14112328531444931 -0.07197504521962297 -0.09458635574717736 -0.07111888833674543 -0.04778938176830134 -0.07006269337299953 -0.0006112484107558894 -0.06882982381916274 0.04639260463121919 -0.06742603524805554 0.09326480043938123 -0.06587291923116814 0.1394542742929683 -0.06417691157738512 0.18490171739078654 -0.06235619042943068 0.22907908712075178 -0.06041711579877602 0.2718215236015508 -0.05837368993567151 0.3126487816109077 -0.05623199481620016 0.3513167784079271 -0.05400191759741171 0.38743123973566757 -0.05168941177620146 0.42072787172446396 -0.04930094652976796 0.45094463391022377 -0.04684267256121077 0.47787385017183387 -0.04431883311163118 0.5014187669611014 -0.041736209038427376 0.521491906414782 -0.039098288615307446 0.5381581198296904 -0.036412886746460524 0.5514734429063308 -0.03368403675893165 0.5616209554274871 -0.030920638554105997 0.5687802370708128 -0.028127804825474556 0.5731909700373314 -0.02531484157397702 0.5751097105462974 -0.022487308743942426 0.5747774295082593 -0.019653399281699922 0.5724720950057939 -0.016817488916486524 0.5683965403711956 -0.013984776710950473 0.5627961157006345 -0.011156755753809477 0.5558100805631925 -0.008334513930174202 0.5476074923401087 -0.005516256227994452 0.5382520137289661 -0.002699657672604304 0.527813181187126 0.0001189084075462632 0.5162830613683137 0.0029430470425718305 0.503632212478296 0.005776140723850809 0.48979718870085825 0.008621359964118685 0.4746643781984374 0.011480943060278768 0.4581321033684284 0.014357453041979532 0.440020092636561 0.01725238528286896 0.42020237354878065 0.02016829545512584 0.3984504007967921 0.02310645906964665 0.3746299139398157 0.02606970884267541 0.34848649282906463 0.02905912926914736 0.319901535830721 0.03207770868490235 0.28862068408597913 0.035126188572359815 0.25457379970514515 0.038207563795155684 0.2175326373776335 0.04132206411400912 0.17751727556861663 0.04447214317201428 0.13435254266130953 0.04765639352677838 0.08820472859603576 0.05087436929373302 0.03898772074319038 0.054119565750739076 -0.01290931137214582 0.05738389109073569 -0.06743332142667403 0.06065100379530782 -0.12388866966504303 0.06390131226166325 -0.18204173010744873 0.06710751180156788 -0.2408638051202936 0.07024061136668767 -0.29997112150320543 0.07326777632141634 -0.35811924769650033 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 10.409640964860152 0.08819071536481193 -0.4149069139457339 0.09619520696373453 -0.46928748510763735 0.10389135339137695 -0.5204367205600724 0.11130929442464123 -0.5682712781650773 0.11833993167585419 -0.611879702514366 0.1250477180062878 -0.6515517673339009 0.13131861312931936 -0.6864789352245387 0.13722168063355095 -0.7170991334186777 0.14266560495692346 -0.742933736483262 0.14770925416675693 -0.7644646634118518 0.15229857738745642 -0.7816306343679023 0.1564806496055351 -0.7949443414430579 0.16023797737408738 -0.8046976332818475 0.1636077291083858 -0.8114340320762232 0.16659926998233585 -0.8156492441089638 0.16924070972607003 -0.8178966736657746 0.17155607592056987 -0.8187143589890898 0.17356418021991835 -0.8186253777263761 0.17529333598785926 -0.8180896946490376 0.17675295247890663 -0.8175572445496273 0.1779691059284766 -0.817354340047812 0.17894280000385046 -0.8178269268106807 0.17969538435042715 -0.8191713629642143 0.18022162677076456 -0.8216135213378047 0.18053839026064605 -0.8252598528088535 0.18063668566331267 -0.8302067836266218 0.1805299853875718 -0.8365229314992384 0.1802074046250964 -0.8441631336022754 0.17967981670048222 -0.8532075272475503 0.1789355424853634 -0.8634513137949188 0.17798326755541646 -0.8750287416542989 0.17681132867628865 -0.887554326590826 0.17542660137393276 -0.9012536368339928 0.17381837592989213 -0.9155413458847322 0.17199221310146043 -0.9307676579480367 0.16993952220315098 -0.9461349814408211 0.16766506471118953 -0.9621467575669418 0.16516364586374446 -0.9777922837002334 0.16243953772159195 -0.993748576989519 0.1594920371449016 -1.0088065936443156 0.1563247069559949 -1.0238232417163582 0.15294182318637703 -1.037422536680502 0.1493452310864943 -1.050627837208456 0.14554376262052104 -1.0619410708426833 0.14153582814125984 -1.0725166793222938 0.13733355092715802 -1.0807866030147106 0.13292996455763686 -1.087982502025801 0.12833884623009964 -1.0925193796903057 0.12354646511997691 -1.0956426233384964 0.11856706162625542 -1.0958020305834135 0.1133802645908518 -1.0941966755949808 0.10800100105812863 -1.0893658046994403 0.10240457868285631 -1.082418058669095 0.09660845741737176 -1.0720439785404696 0.09058739676793154 -1.0592336290212614 0.08436305249179103 -1.0428864668165145 0.07791226539608737 -1.0238434884807006 0.07125948103451105 -1.0012389500998498 0.06438315702068477 -0.9757184564964672 0.05730572259337024 -0.9466331160524846 0.05000517598555396 -0.9143924657188269 0.04249696967205718 -0.8785265422616264 0.034758266112670654 -0.8392161272269405 0.026795846035400528 -0.7961485011658664 0.018589286386854375 -0.7493553392669674 0.010139467035586786 -0.6986945654074309 0.0014353647947012306 -0.6441944101545001 -0.00752115527098163 -0.5859298568667703 -0.016721605377723273 -0.5241124961932994 -0.026153003776033103 -0.4591005235541573 -0.03577562853766693 -0.3914732582128898 -0.04555481862708278 -0.3219176368710749 -0.05541191920991336 -0.2514605705939643 -0.06528781052447932 -0.18104268009612928 -0.07507176551896269 -0.11196716884842867 -0.08469370634111079 -0.045162006532842455 -0.09403502356799048 0.01821179615956009 -0.10303796293742751 0.07755922213376197 -0.11160234348863876 0.13225437750533192 -0.11969786378568258 0.1821603094740204 -0.12725541181748115 0.22728666466826797 -0.13427217451795043 0.26783156399962293 -0.14070908609668734 0.30425702197237353 -0.1465822808211363 0.3368814751552901 -0.1518758266142326 0.36633754027808085 -0.1566154118738989 0.39291218318598214 -0.16080118904701451 0.4171911257719779 -0.16446191663878856 0.439386492352583 -0.16760874934436956 0.45994014257091914 -0.17027032145900653 0.4790125412879642 -0.17246544172211387 0.4968965516448674 -0.1742214829225447 0.5137330418422221 -0.17556229349994176 0.529702119109355 -0.17651346942581347 0.5449382422663684 -0.17710120406078317 0.5595509990354525 -0.17734850432373442 0.5736631328380837 -0.17728095717839618 0.5873454899766751 -0.1769177686775203 0.6006960746658089 -0.1762812150778771 0.613764970121127 -0.17538563197839754 0.6266142622289381 -0.17424827994898226 0.6392811475599468 -0.17287834325098408 0.6517867606960132 -0.17128780343818575 0.6641588143432021 -0.1694814741949339 0.6763783318612707 -0.1674671025212049 0.6884671836284598 -0.16524673951472887 0.7003707782968038 -0.16282564724467663 0.7121119022782677 -0.16020497914663234 0.7236060279746358 -0.15738920239780224 0.7348873058599402 -0.15438008037583925 0.7458455120547056 -0.15118229551688078 0.7565384199978373 -0.14779895130960063 0.7668301273250832 -0.1442349769408437 0.7768124197515427 -0.14049463411143517 0.7863171007165805 -0.13658219544935649 0.7954741619788589 -0.132502113846017 0.8040685198276143 -0.1282565017601814 0.8122628961565758 -0.1238486220307661 0.8197735447121919 -0.11927692652479756 0.8267805052628172 -0.11454233364484324 0.8329073393003051 -0.10963893261366696 0.8383313848633596 -0.10456509532441988 0.8425671908721037 -0.0993111775637087 0.8457737709650067 -0.09387389520865055 0.8473581677123205 -0.0882413727562096 0.8474537410178298 -0.08240967054528078 0.8453805105688886 -0.07636567125135234 0.8412395852848171 -0.0701045317043999 0.834294234754726 -0.06361143802990525 0.8245967623360527 -0.056879019820155384 0.8113860793403472 -0.04988970657862488 0.794642589481885 -0.04263242308028096 0.7736237303966355 -0.03508770473252842 0.7482350267191937 -0.027243027320387078 0.7178340652472136 -0.019082647679930783 0.6823092058987041 -0.010600480546470854 0.641268033428031 -0.0017955564365737776 0.5947218641755667 0.007319606754988065 0.5427299661662702 0.016718640533875367 0.4856052713895038 0.026359930602735223 0.4240127900089399 0.036182739926386646 0.35867490428164217 0.04611385091801457 0.2908482747267982 0.05606162947545539 0.22161312840767206 0.06593037007390823 0.1525896299052526 0.07561430450813424 0.0849607780893658 0.08501680035337579 0.02029315723590923 0.0940431345351202 -0.04052424263000665 0.10261865971181403 -0.09638244589331116 0.11067811725303328 -0.14697018642225895 0.118180331713916 -0.19179606778610622 0.12509487775363876 -0.23114927335514124 0.13141359884161977 -0.2650834168138867 0.13713663507077195 -0.29430739240426507 0.14228077974770312 -0.3192369599384854 0.14686743606508704 -0.3407334212170972 0.15092772427907628 -0.3593789459486999 0.15449459594193876 -0.37594894675551993 0.1576047814089293 -0.3910421457783557 0.16029561516666943 -0.4052157215709242 0.16260420634795228 -0.4190001655067043 0.1645677856382568 -0.4327169960885374 0.1662209454230024 -0.4467935203000854 0.1675975479088609 -0.4613674546372524 0.1687273435015393 -0.4767505099603791 0.16963775840100317 -0.4929621104897127 0.17035135382512145 -0.5101927831906012 0.17088662588441403 -0.5283921397975019 0.17125739854221864 -0.5476316557483534 0.17147247575898292 -0.5678210742275924 0.17153720762487212 -0.5889283824258583 0.171452098271148 -0.610844431504618 0.17121592555225476 -0.6334598456320176 0.17082348939423786 -0.6566621554397092 0.1702693652855135 -0.6802901690751954 0.16954519227102252 -0.7042361807970307 0.1686433234736315 -0.728304181981795 0.16755428238189196 -0.7523925937924013 0.16626975521035586 -0.7762801047762006 0.16478091575972567 -0.7998709558269159 0.16308016404278422 -0.8229261592529834 0.16116097422959144 -0.8453581764823072 0.15901763086535803 -0.8669195682240016 0.15664719043153918 -0.8875360783181537 0.15404640814126982 -0.9069617478914543 0.1512163375941133 -0.925139486238786 0.14815580737927359 -0.9418323164903497 0.14486904119737432 -0.9569981391691366 0.14135541179733105 -0.9704111048889561 0.13762020159413696 -0.9820314795177431 0.133660813877529 -0.9916390155676481 0.12948051899396187 -0.9991701127691018 0.1250717129719964 -1.004395805878196 0.12043240949563068 -1.0071896562861613 0.11554739737596056 -1.0072943308857691 0.11040741816656507 -1.0044780743215649 0.10498906460572123 -0.9984419146898587 0.09927671007332539 -0.9888202470668659 0.09324153720506613 -0.9752806572449833 0.0868657713216865 -0.9573230525328426 0.08012057202298183 -0.9346129024732525 0.07299128330186265 -0.9065412965285671 0.06545447385777294 -0.872812045837998 0.05750318307122548 -0.8327642042321542 0.04912562647571123 -0.7861973040072392 0.04032976324387047 -0.732531959294125 0.031126820792238292 -0.6717772952539282 0.021552105492413647 -0.6036813743554348 0.011653889912079307 -0.5286170258724867 0.0015050553465150596 -0.4469284428350978 -0.008803737071133409 -0.3594209623311249 -0.019164137844501244 -0.2671562658193436 -0.029453249161672163 -0.17127690435480925 -0.03954444177576723 -0.07347200901695332 -0.04930801277657576 0.025010355844260618 -0.058628913917761864 0.12216804708283775 -0.06740305655099145 0.21697240168683724 -0.07555526670483734 0.30754735094520724 -0.0830287358344227 0.3933235331851863 -0.08979985489891419 0.47288775910030906 -0.09586227665410048 0.5461733343350221 -0.1012384511417096 0.6123761532811126 -0.10596141151237383 0.6718359888478612 -0.11008343587712699 0.7243283828015038 -0.11365900700010084 0.7704409130546045 -0.11675154688505435 0.8103769184609757 -0.11942024658509653 0.8448149426330088 -0.12172543405720408 0.8741951003433431 -0.12372010278409672 0.8991719264083965 -0.12545386869919717 0.9202584619832526 -0.12696858336149236 0.9380189516278908 -0.12830079716175358 0.9529299006547148 -0.1294803121830993 0.9654373678511428 -0.13053132515287053 0.9759255774230075 -0.1314728150436178 0.9847210547004013 -0.1323187117077742 0.9921012118237901 -0.13307926911852763 0.9982889763210514 -0.13376060735592085 1.00346623172182 -0.13436650840132017 1.0077773566738248 -0.13489758648564917 1.0113312633440874 -0.13535318235531 1.0142202456189768 -0.13573032268068938 1.0165028397403146 -0.13602555568265287 1.0182415597838452 -0.13623381501049478 1.0194612676252792 -0.13635013084615788 1.0202112254293032 -0.13636851910741346 1.0204931355125237 -0.13628352220915663 1.0203558804206585 -0.13608927235470877 1.019785661322408 -0.13578077974433056 1.0188436114082697 -0.13535333594892024 1.0175088902739995 -0.1348034032599269 1.01586901213535 -0.13412850479571245 1.0139066990332377 -0.13332751659017994 1.011749903626149 -0.13240112150042047 1.009395873466384 -0.13135131343843148 1.0070224592848098 -0.1301823861664178 1.004648895626031 -0.12889957303978372 1.0025025053324388 -0.12751044385516586 1.0006249527940914 -0.12602282211693597 0.9992798731672907 -0.12444640666141922 0.9985241209505237 -0.1227903362491894 0.9986335778011342 -0.12106482707725541 0.9996681104299342 -0.11927885723103077 1.00188666868928 -0.11744160106869986 1.005339151204078 -0.11556063447381686 1.0102400152964222 -0.11364297098522216 1.016618408686874 -0.11169400755696757 1.0246233484166773 -0.10971804436250304 1.0342556578037978 -0.1077179974377641 1.045587013088756 -0.10569539513243736 1.058585362844138 -0.10365074045087243 1.0732424924482324 -0.10158307745172414 1.089491676886554 -0.09949081138562475 1.1072504351405639 -0.09737099187317519 1.126418495688608 -0.09522038795141362 1.1468507429723778 -0.09303463210129374 1.1684178851785971 -0.09080938432379561 1.190927094011699 -0.08853943665687455 1.2142282543435756 -0.08621986418219355 1.2380963074504816 -0.0838451297280829 1.2623717851221712 -0.08141018717096458 1.286811267735523 -0.07890957521289538 1.311259437047065 -0.07633849048188582 1.3354655433458782 -0.07369183213973302 1.3592920727047408 -0.0709652896454993 1.3824889440630352 -0.06815429812549269 1.404948224001647 -0.06525518906352577 1.4264261343337319 -0.06226403156965298 1.4468526168146465 -0.059177880875772734 1.4659946956086785 -0.05599350180272929 1.4838242303448546 -0.0527087282300559 1.500123855633632 -0.04932108520913619 1.5149072822191358 -0.045829248791231736 1.527979133439155 -0.04223160169565432 1.5393916028337211 -0.03852775981812916 1.5489798786558624 -0.03471711542504194 1.5568289694877147 -0.030800373895191795 1.5628149061166765 -0.026778157275353744 1.567048139892543 -0.022652469072460026 1.569455437733012 -0.018425442610311475 1.5701631682740094 -0.01410061904459259 1.5691544766327274 -0.009681923641888362 1.5665582080150355 -0.005174607833595415 1.5624104312246976 -0.0005845371149155155 1.5568231312736622 0.004081360830194158 1.5498697785406994 0.008815373280224102 1.5416213482606351 0.01360915816251173 1.5321646103983713 0.01845338183924741 1.5215084226089315 0.02333851378089042 1.5097321756864608 0.028253595356417707 1.4967775286726341 0.03318770696326771 1.482712781517159 0.03812777819723794 1.4674306871078424 0.04306084698510776 1.4510029301815637 0.04797105490813325 1.4333064036721332 0.05284282054787382 1.4144337927719197 0.05765729040631008 1.394275892725783 0.062396339588492174 1.3729515448865748 0.06703875617530422 1.3503829149238482 0.0715647229134606 1.3267074204497589 0.07595207889546916 1.3018885860152005 0.08018082073510707 1.2760719579814896 0.08422988313279918 1.24927013124597 0.08808115168144365 1.2216290717750564 0.09171712498126333 1.1932122879846165 0.09512388874984529 1.1641597200822134 0.09828978382821513 1.1345747533716009 0.10120692089905343 1.1045839389163097 0.10387076575305491 1.0743049301738572 0.10628016163003658 1.0438434894228241 0.10843758128677879 1.0132976568969678 0.11034809001251629 0.9827480846219169 0.11201983654144877 0.9522421746867393 0.11346272585301204 0.9218372987413541 0.11468856674226395 0.8915121268695542 0.11571035189244888 0.8613096130292351 0.11654143789708364 0.8311384097438445 0.11719624994228722 0.8010406098600176 0.11768796102750491 0.770866860578419 0.1180311884203289 0.7406720723856538 0.11823782221924535 0.710266491305123 0.11832194972914371 0.6797271599000456 0.1182937086748413 0.648839855995228 0.11816636914627941 0.6177061394281178 0.1179483078201844 0.5860976329761105 0.11765195734547444 0.5541356530705818 0.11728417750264418 0.5215820025448654 0.11685665421625206 0.4885674106245438 0.11637506322512164 0.4548441817118035 0.11585044136672534 0.42053903893558936 0.11528760026321361 0.38539383080709005 0.1146969754887285 0.34951751956057275 0.11408276516948551 0.31264162209872376 0.11345477185655123 0.2748457200864682 0.11281672771320268 0.23585364531896036 0.11217767231363375 0.1957080306998799 0.11154089231327205 0.15413068283788584 0.11091442459362362 0.11112502730615485 0.11030098978017228 0.06641936035306309 0.10970727509584575 0.01998158233928584 0.10913518019499158 -0.02844252501100849 0.10858961838455394 -0.0789108467937913 0.10807131987827256 -0.13164680810869955 0.10758299140083706 -0.1867185450381734 0.10712383487010102 -0.24430276975941861 0.10669401578100456 -0.3044571563475782 0.10629095480218403 -0.36729255501734565 0.10591215955027329 -0.4328316351218246 0.10555323917564342 -0.5010971103830113 0.10520923357460961 -0.5720515370613506 0.1048742034488946 -0.6456071380870831 0.10454123322613965 -0.7216476016443415 0.10420338388279249 -0.799960417944077 0.10385255216515443 -0.8803486060902048 0.10348155277152277 -0.9624805352040958 0.10308199899599224 -1.046102591465302 0.10264729353398347 -1.1307962358958192 0.10216966968576083 -1.216298249538859 0.10164389553660572 -1.3021536831582157 0.10106359603185665 -1.3881410907624268 0.10042550886303733 -1.4738162531491839 0.09972520271575426 -1.5590323531879724 0.09896176774275783 -1.6433811153856217 0.09813303559968362 -1.7267953819453006 0.09724062840791829 -1.808904273967015 0.0962847468436548 -1.889705812003298 0.09526952258400695 -1.9688553604100596 0.09419738573767567 -2.0463961595217497 0.09307467534425755 -2.1219972494014083 0.09190557725272554 -2.1957297656294994 0.09069795973683892 -2.26726702684827 0.08945692176362074 -2.33669427659979 0.08819082582670809 -2.40368326402722 0.08690460385242187 -2.4683218293452596 0.08560592844316832 -2.530278708447705 0.08429854858075693 -2.5896357682920064 0.08298845355390602 -2.646064588790617 0.0816776035979036 -2.699638113135378 0.0803698085988218 -2.7500465166411794 0.07906524431558365 -2.7973583061589613 0.0777656464794368 -2.8413066109693195 0.07646995785567721 -2.8819658135431503 0.07517840481306397 -2.9191387144957552 0.07388952076570944 -2.952916939528243 0.07260276565047949 -2.9831934469296657 0.07131703419494048 -3.0100842147812483 0.0700316714984602 -3.033579837179789 0.06874642149793708 -3.0538203070779315 0.06746091561483775 -3.0708862658823426 0.06617587890109893 -3.0849340937035734 0.06489135475066077 -3.0961148757320047 0.06360889175009427 -3.1045894946120915 0.06232887364895087 -3.110554176556957 0.0610533697884439 -3.1141624334010367 0.05978295908951196 -3.1156313575827808 0.058519933868126935 -3.1150988340976697 0.05726496052192346 -3.1127843312756185 0.056020364515584314 -3.108807174807579 0.05478689087169336 -3.103378295255011 0.05356685116993747 -3.0966004805876164 0.052361162083470275 -3.0886719873072646 0.05117219143690337 -3.0796842450825404 0.0500011803184788 -3.069823178842427 0.04885067692177529 -3.0591749083686 0.047722394667056495 -3.04791518025239 0.04661916546558988 -3.0361297210864353 0.04554325398178312 -3.023985824176148 0.04449779440344017 -3.011571347495047 0.043485567260037 -2.999045023865287 0.04250990583952996 -2.9864965962665413 0.04157394242971174 -2.97407382211979 0.04068098339018267 -2.9618657256417347 0.03983424007548063 -2.9500046937401923 0.03903667704678025 -2.9385750291169663 0.03829125204407631 -2.9276882292022863 0.037600242111558525 -2.9174196366327845 0.03696602039541775 -2.9078542526133653 0.0363898628555389 -2.8990550063438043 0.03587328400749033 -2.89107559978743 0.03541632587527697 -2.8839645116668047 0.03501946085787572 -2.877740698472869 0.03468136801415045 -2.8724377410593567 0.03440139273076098 -2.868037963030309 0.034176825013594855 -2.8645610107319532 0.034005886471890914 -2.8619522197779856 0.03388454002362548 -2.860219327957052 0.03380995702369776 -2.859271752234038 0.033776904656814676 -2.8591080390789636 0.03378163025636565 -2.8596040736358375 0.033817888420909256 -2.8607522825127365 0.03388116572688844 -2.8623986183911416 0.0339644222565423 -2.864532503390469 0.034062568651713095 -2.8669746267217606 0.03416800578210895 -2.869714264542722 0.034275258681725755 -2.8725523687260712 0.034376403869554636 -2.875480392481751 0.03446576225065038 -2.8782856560016388 0.03453530643596026 -2.8809633690727776 0.034579317283149 -2.8832936030002845 0.0345898632024517 -2.885276084644168 0.0345613315992102 -2.88669002290566 0.034486067962070384 -2.8875396980561656 0.03435870246733252 -2.887609638606028 0.03417202823169708 -2.8869082406801487 0.03392105357509642 -2.885231186404673 0.033599186583847876 -2.882590411631577 0.033201954714099646 -2.8787981259714064 0.032723547988797286 -2.8738694127179345 0.03216016008445882 -2.8676378007230237 0.031506926698858656 -2.860121537338786 0.030760856301430498 -2.8511795266344366 0.029918184868819725 -2.840833694455953 0.02897687993086776 -2.8289714676936812 0.027934418761735542 -2.8156194326761663 0.026789865572439137 -2.8006956335334094 0.02554206739406328 -2.784232678459704 0.024191320272891358 -2.76618021920821 0.0227379648438729 -2.746578557413038 0.02118367183521644 -2.7254089380879676 0.019530406629814718 -2.7027213352624018 0.017781375473342333 -2.678527817546934 0.015940314217275202 -2.6528903452264383 0.01401214248723965 -2.6258505827974417 0.012002516590326513 -2.597485033098476 0.009918238993098294 -2.567863412900346 0.007767001894609416 -2.5370791395111163 0.005557595797786177 -2.5052279065794134 0.0032997639283623625 -2.4724213518052824 0.0010042501750187815 -2.4387779340553353 -0.0013173092924825892 -2.4044265629460275 -0.0036524609115630604 -2.3695033548314033 -0.005988058938645173 -2.3341502073243365 -0.008310432060596428 -2.2985133297729607 -0.010605551162675529 -2.2627394425624114 -0.012859274591025188 -2.226974666889942 -0.015057553093339258 -2.1913586852934612 -0.017186766710344335 -2.156024974456141 -0.019233930119074524 -2.1210915890442634 -0.021187148082931977 -2.08666512682715 -0.023035758263428283 -2.052826523762555 -0.02477090949288659 -2.019640890790767 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 2.36501140494985 -0.08166436036234545 -1.987138223085831 -0.0787321072914443 -1.9551792969186108 -0.07599394780551347 -1.923872660911774 -0.07344911249147731 -1.893122214881887 -0.07109018963744064 -1.8628757070526836 -0.06891211629259748 -1.8330521305756393 -0.0668928330833535 -1.80346392086791 -0.06502140542755991 -1.7740307894638458 -0.06326431025277701 -1.7444585537997397 -0.061604338498113885 -1.7146568358389247 -0.06000161628930765 -1.684270525132482 -0.05843391121228774 -1.6531968656020846 -0.05686035733883503 -1.6210705666844352 -0.05525618596817877 -1.5877836887274754 -0.053583611072216916 -1.5530023913987367 -0.05181802616212104 -1.516624969441158 -0.04992686053498904 -1.4783740374400947 -0.04788764203431397 -1.4381629046138653 -0.04567338909952016 -1.3957785462729297 -0.04326455043440861 -1.35115074869042 -0.04063896887057775 -1.3041250053381268 -0.0377796726214917 -1.2546404287033996 -0.034668156556590404 -1.2025858755053915 -0.03128903781475615 -1.1478962221079847 -0.027626543734266033 -1.0904838796013552 -0.02366606141293296 -1.030265332553422 -0.019394424705885607 -0.9671575819984114 -0.014798217817489943 -0.9010531679246364 -0.00986791244282185 -0.8318626339390031 -0.004594000334566826 -0.759468200252985 0.0010272601736873443 -0.6837784624416451 0.006996172191691967 -0.6047050146758137 0.013308278657665507 -0.5221784403932835 0.019948125249577366 -0.43620162414144764 0.026902233043931835 -0.34676404956188345 0.034134609358992644 -0.25402617373659675 0.04162543965990861 -0.15806743528157352 0.04931791602852394 -0.05925246109419709 0.05719194251708348 0.04224582804315026 0.06517377594149974 0.14584643139232617 0.07324948875874737 0.2513171009399907 0.08133346338320191 0.35788240897626183 0.08942204460492725 0.46531858478956084 0.09742247911143256 0.572706734354375 0.10534392007106748 0.6799212078063028 0.11309232856898424 0.7859738503928302 0.12069286959383829 0.8909282707551285 0.12805752989304062 0.9938069229606 0.13523048281196703 1.0949252453947567 0.1421350115149702 1.193370712508351 0.14883364108053468 1.2897200795059927 0.15526039980378137 1.3831314291306616 0.16149058800853022 1.4743990548341288 0.16746283016416427 1.562711333759588 0.17325717308769498 1.6490055822599838 0.17880874761584764 1.7324381437111245 0.18419562426490121 1.8140125230867712 0.18934354237428977 1.8927971167730224 0.1943257003155364 1.9698051642105856 0.19905712167693854 2.043989409110261 0.20360713566122887 2.1163487616724455 0.2078830567098242 2.1857286776703733 0.21195320597037345 2.253119008087854 0.2157224887761509 2.317294805729626 0.21926054951261875 2.3792562997512903 0.22247539918156545 2.437758643385902 0.22543860277021016 2.4938291151796923 0.22806582353246932 2.5462536729513117 0.23042945488934893 2.5960896249698067 0.23245592118662772 2.6421958125734433 0.23421626339802765 2.6856459454093073 0.23564923201044313 2.725401149825996 0.23682192910428018 2.7625249246529764 0.23768560903447117 2.796097602497845 0.2383010251863283 2.827139663635626 0.23863107576555326 2.8548579320466247 0.238728318720346 2.880197273811896 0.2385658028809651 2.902492091113742 0.23818677215656103 2.922583421450704 0.23757264185534216 2.939930836416669 0.2367569480467524 2.9552496451027404 0.23572763125111146 2.9681202518618117 0.2345087588288122 2.979117166035644 0.2330930277032766 2.9879358924476085 0.2314957883609693 2.9950017633748804 0.2297128507928873 3.0001180267622547 0.22775199726626852 3.0035588503609105 0.22561064971903366 3.005225618564426 0.22329047469507338 3.0052455192168286 0.2207891769919581 3.0036056531104443 0.218104012116729 3.000296596197855 0.21523190749098295 2.9953759371553863 0.21216767986654217 2.9887145412953835 0.2089069017168039 2.9804235169394584 0.2054443011770799 2.970278270636442 0.20177435552883274 2.9584272376617506 0.19789463211781277 2.9445833540624333 0.19380004666325434 2.928920996116418 0.18949467976583445 2.911134052125345 0.18497697447506314 2.891421117895554 0.18026175542096873 2.8695115538368055 0.17535530504827765 2.8456396635585266 0.1702869813884022 2.819632364701398 0.1650750617014932 2.7917827580706795 0.15976489916866216 2.762073150610412 0.15438859011919057 2.730879717527982 0.1490048119445332 2.6983729845658186 0.1436573673936534 2.6650197823910617 0.1384114012857834 2.6311654318055493 0.13331670449583788 2.597342234424131 0.12843598151509178 2.5640043046920047 0.12381759493044107 2.5316889309161814 0.11951374315406782 2.5008590955559495 0.11556454483122224 2.471977292047213 0.11200697582949837 2.445419609766912 0.10886885963285664 2.421502599889751 0.10617198445600813 2.400460253256978 0.10393151994118614 2.3824235508349583 0.10215798579786484 2.367481624919283 0.10085594236126863 2.355576110472847 0.10002955029985441 2.346684266939777 0.0996745363707602 2.340578964374648 0.09979136307593073 2.337164466978407 0.10036591862245105 2.3360762482866155 0.10139337308888051 2.337166727016829 0.10284525238514063 2.3399742227463864 0.10470652753396292 2.344298757512591 0.10692934786197564 2.3496456184543497 0.10948539688738568 2.3557575910119306 0.11230834776401105 2.3622112091678087 0.11536111662117528 2.368700919347634 0.11856872010180368 2.374999183881564 0.12189582300553228 2.380762771422271 0.12527109382023985 2.3860408695224655 0.12866851280647384 2.390434489753985 0.13202602669966873 2.394255175483707 0.13532606058233512 2.3969992095163972 0.13851302873812707 2.399141099367427 0.14157140299912077 2.4000281880253027 0.14444599840695538 2.400165932043837 0.14711776587858955 2.3987565454222275 0.149528814934982 2.396228900601763 0.15165648959411307 2.391715845342212 0.15344351130116646 2.3855272499881304 0.15486946297615986 2.3768599795197014 0.1558859920559034 2.365932032302543 0.1564823982699515 2.35213704434879 0.15662705151213252 2.335659586575807 0.15632267057381974 2.3161554594651923 0.15555698288361428 2.293817193950503 0.15434446821973782 2.2685395289153094 0.15268946928606 2.240528594107405 0.15061372030028433 2.209839436449588 0.1481329425004041 2.1766751989992352 0.14527207121391744 2.1411670132494267 0.14205341070809288 2.103497998587221 0.1385031956161297 2.0638177101619246 0.1346474012966354 2.022284240858012 0.13051372772809136 1.9790426652459887 0.12613076870407766 1.934234347217559 0.1215288002087773 1.8880050170183613 0.11673884430511164 1.8404958271379868 0.11179428267906186 1.7918735341448593 0.10672782055476494 1.7422970026606726 0.10157459709270147 1.691975720713764 0.09636672075441315 1.6410996041440211 0.09113769277656576 1.58993161424687 0.08591559898605032 1.53869612709769 0.08072827321094632 1.48770011198151 0.07559682725003468 1.4371906370834426 0.07054073561483064 1.3874859099280104 0.06557300645505505 1.33882747282967 0.0607042988610929 1.291496374399353 0.05594030814461348 1.2456918436560962 0.051284407124787865 1.2016116275916988 0.046737304646367564 1.1593828149787744 0.042298198074684476 1.119094835084617 0.03796626305667307 1.0807924445903745 0.033740366147277774 1.0444594736438537 0.029621413140255248 1.0100664751680644 0.025610615617555378 0.977512605641775 0.02171170275138488 0.9467089353571289 0.017928124595372172 0.9174961950871574 0.014264722381274426 0.8897410991288242 0.0107248247042327 0.8632546958628358 0.007311532407557251 0.8378797673067536 0.0040257246646942715 0.8134312870637537 0.0008671109441111813 0.7897555346935128 -0.002166404173110156 0.766702479706226 -0.005078087212719008 0.7441442771067924 -0.007871807072952654 0.7219801512546445 -0.010552141065012913 0.7001139911148689 -0.013122974280688364 0.6784891423456777 -0.015588510594487693 0.6570342112868515 -0.017951357004282582 0.6357226685315808 -0.020214359390043787 0.614498423866415 -0.022378385130258537 0.5933518769226717 -0.02444470972657622 0.5722359829030068 -0.02641266598048775 0.5511484290658472 -0.028282216386786147 0.5300479504621753 -0.030051658161220733 0.5089322595265787 -0.03172002153341536 0.48776343041539383 -0.033285053801991 0.4665326983956335 -0.03474514299163455 0.4452022618978615 -0.036097797445661314 0.423750809853405 -0.03734087236042992 0.40213686801132126 -0.03847170973779384 0.38032246181574353 -0.039487534696183706 0.35826003699920245 -0.04038535465249453 0.3358954172572054 -0.04116149014815324 0.31317630045885325 -0.04181227134992602 0.29003936468238256 -0.04233275671991312 0.26643349125682225 -0.04271820390506512 0.24229983196545887 -0.04296209410187162 0.21759740523248447 -0.04305828941749883 0.1922896805280268 -0.0429985344926887 0.16635417811409603 -0.04277516333723746 0.1397947179823048 -0.042378324664133514 0.11261057806904226 -0.04179909451110235 0.08485911825976283 -0.041026772044990806 0.05655530098310728 -0.04005228095691792 0.02781295487164972 -0.03886594487518335 -0.001355217696917474 -0.03746105684458709 -0.030790854208408863 -0.0358324528257955 -0.060512067162684924 -0.03398001331815535 -0.09034383512052747 -0.0319080363715738 -0.12036637037313254 -0.029628211595323046 -0.15042549833612953 -0.02715936193397055 -0.1806822257783562 -0.02452925944338791 -0.2110355353558589 -0.021774015350039214 -0.24171670890131905 -0.01893812711454703 -0.27268796404148105 -0.016072789348604365 -0.3042053886738647 -0.013233892195268538 -0.33627397301123685 -0.010479121653365411 -0.36910778821408496 -0.007864699718518043 -0.4027125327427404 -0.005442422243655123 -0.43720233327165897 -0.003256970563444272 -0.47254391656758543 -0.0013443279614369207 -0.5087300081649914 0.0002692000810859618 -0.5456706948219403 0.001567029626306502 -0.5832553397495863 0.002541862501053749 -0.6213435663598691 0.00319490368790333 -0.6597614770645397 0.0035347073692164645 -0.6983382606988046 0.003576087205623605 -0.7368772047789754 0.0033386867399240183 -0.7751977158353728 0.002845793165663432 -0.8131073355837525 0.002122920766066665 -0.8504311442809835 0.0011966817906317983 -0.8869965185342673 9.360602055194845e-05 -0.9226485693381623 -0.0011607887432977548 -0.9572475978825207 -0.0025428263551641028 -0.9906755580304509 -0.0040313196435912725 -1.0228400643567834 -0.00560805265674052 -1.0536768578730262 -0.0072581372396347535 -1.0831539680599167 -0.008970222642725637 -1.1112730731545446 -0.010736603688212967 -1.138070126599777 -0.012553238129300193 -1.1636163823223817 -0.014419663754144997 -1.1880147618548964 -0.016338856545464284 -1.211399804593373 -0.018317011667221186 -1.2339311409236442 -0.02036321331951854 -1.2557908962611386 -0.022489150141126384 -1.2771767710979753 -0.024708523887670666 -1.2982956219690895 -0.027036815716102617 -1.3193587877106545 -0.02949033840057564 -1.3405718669388043 -0.03208622048840566 -1.3621334580457725 -0.03484099586276813 -1.3842237381617222 -0.037770988346695664 -1.4070038106152531 -0.04089034016836199 -1.4306098157127698 -0.04421193118149622 -1.4551434409599324 -0.04774479918457043 -1.4806840538653903 -0.05149560635731011 -1.5072530742075834 -0.055465523598854614 -1.5348630834737704 -0.059652078129150514 -1.5634327872983125 -0.06404577665778773 -1.5928964245439938 -0.06863198814677621 -1.6230462386765763 -0.0733879111679891 -1.6537236837958988 -0.07828407913961863 -1.6845773088688651 -0.08328277773604967 -1.7153452543609788 -0.08833881752529395 -1.745540186185215 -0.09340087699265817 -1.7748066506609754 -0.09841103523499317 -1.8025759489293214 -0.10331008178747096 -1.828441921080677 -0.10803478940482847 -1.8518570759376167 -0.11252692749760175 -1.8724298145848335 -0.11672713333564141 -1.889749617068817 -0.12058621091299486 -1.9034947265914584 -0.12405552089582851 -1.9134692853441453 -0.1270989745324333 -1.9194433477304385 -0.12968141018745763 -1.9214465931210438 -0.13178005889684458 -1.9193365912242182 -0.1333729981247131 -1.9133209192955687 -0.13444916968474427 -1.9033381390736483 -0.13499857652358235 -1.889696799139305 -0.1350202289558844 -1.8724241089635283 -0.1345150590974271 -1.851854444818648 -0.13349151578125465 -1.8281255058285388 -0.13196166193120598 -1.8015487873645357 -0.12994440830871953 -1.7723982979585868 -0.127464875301665 -1.740956047576554 -0.12455532605228314 -1.7076503434514345 -0.12125731517024546 -1.6727711089594994 -0.1176201252911214 -1.6368997229788158 -0.11370432631940623 -1.6003938773217044 -0.1095770902157664 -1.5639520950264867 -0.10531521475243881 -1.5280341015762384 -0.1009971572853501 -1.4933704976114637 -0.0967048425406431 -1.460487812418304 -0.09251443019871244 -1.430032048969448 -0.08849859611083623 -1.4025070204123145 -0.0847192224146452 -1.3783872059265605 -0.08123140569580317 -1.3580743461208715 -0.07807913147444402 -1.341856674268312 -0.07529979007150138 -1.3299990641208466 -0.0729221097774346 -1.3226408421025144 -0.07096953857557063 -1.319907422598494 -0.06945996685934484 -1.3218393313542933 -0.06840722517757378 -1.328432715179406 -0.06782243306491151 -1.3396707192195099 -0.06771329912154633 -1.3554308028362214 -0.06808681309727552 -1.3756637203152144 -0.0689460256169512 -1.4001292171072723 -0.07029370032860749 -1.4287480231334908 -0.07212604743134556 -1.4611415512869077 -0.07443712315203088 -1.497169043352816 -0.07720906102500401 -1.5362611293578101 -0.08041800436724363 -1.578145503970304 -0.08402141736900903 -1.6219938745718092 -0.08796824735043067 -1.6673273795029324 -0.09218534134053355 -1.713043989952772 -0.0965953247776213 -1.7584598887364826 -0.10110337945109675 -1.8023251482737475 -0.10562276586344131 -1.8438843538137342 -0.11005936624402135 -1.8819943803425474 -0.114339284185479 -1.9160164238326374 -0.11838803620472298 -1.9451459210381303 -0.1221551682512088 -1.9689611167531718 -0.12559044815963857 -1.9870821500450044 -0.12866454363237978 -1.9992884601836656 -0.1313473285497177 -2.0055872847108382 -0.13362457996567703 -2.005902552936595 -0.1354812983427758 -2.000538607769795 -0.13691379112279145 -1.9895222335305016 -0.13791840113921283 -1.9733473622753 -0.13849876681979686 -1.9521226091542032 -0.1386592762636167 -1.926416406944613 -0.13840831482750887 -1.8964085820200511 -0.1377553466721629 -1.8626325693345576 -0.13671172138312082 -1.8253346026237682 -0.13528994514010206 -1.7849287037928665 -0.13350387474318026 -1.7417282757844885 -0.1313684850979729 -1.6959849163169152 -0.1289011242735731 -1.6480836819658973 -0.12612004171206567 -1.5981175260833023 -0.12304794728949948 -1.5465460432010831 -0.11970776307097554 -1.493345725590957 -0.1161291555554568 -1.4390452645076293 -0.11234041157181196 -1.3835653375705195 -0.10837805120835477 -1.32748587989644 -0.10427444691723692 -1.270727096700019 -0.10007021778636371 -1.2138915171938154 -0.09579818901600025 -1.1569314791526326 -0.09149803740182355 -1.1004384153588684 -0.08719814264673481 -1.044401264021844 -0.08293175987986605 -0.9893667463252799 -0.07871876138944835 -0.93534274316536 -0.0745823604789827 -0.8828018526445712 -0.07053244174787425 -0.8317453502716904 -0.06658169123399929 -0.7825528296807693 -0.06273142263001626 -0.7352009998702241 -0.05898606969459947 -0.6899734576665635 -0.055341629201618635 -0.6468190160165529 -0.051797845661911916 -0.6059361892074298 -0.04834895780255909 -0.5672533676753269 -0.0449933856881142 -0.5309008297309787 -0.04172640220446403 -0.49679709043355263 -0.03854750857976113 -0.4650200563470832 -0.03545474314949003 -0.4354871419180554 -0.03245009303096289 -0.40823623952330296 -0.029535217970485497 -0.38318903291833284 -0.026715240669999765 -0.360351916060863 -0.02399566841912626 -0.33965168682988306 -0.021384849015423982 -0.3210674572012304 -0.018891858268783746 -0.3045265108406362 -0.016527878438635644 -0.289980687085972 -0.0143048164947105 -0.2773486249926504 -0.012235861272534874 -0.26655233937945033 -0.010334609529720554 -0.25749115220515933 -0.008615056544594787 -0.25005417856698375 -0.007091033877877486 -0.24411207870918267 -0.00577586326716881 -0.2395183600388857 -0.004681951769585305 -0.23610836882905192 -0.0038202849719388555 -0.23369776020532904 -0.003200074718126953 -0.23208303732442023 -0.002828193233241023 -0.23104118247604563 -0.002708814109649598 -0.2303305117676073 -0.0028428121423108273 -0.229692899295787 -0.0032273776312465984 -0.2288561895764718 -0.003855361739615206 -0.22753947142886222 -0.004714926016275303 -0.2254592545959012 -0.005788891393083394 -0.2223361909797665 -0.007054745616852392 -0.21790946509266354 -0.008484349416800499 -0.21193653132849258 -0.010045025533760958 -0.20422633566219547 -0.011700102440193559 -0.19461060411464914 -0.01341156791638054 -0.183014488580798 -0.015140978768993152 -0.16936444760392688 -0.0168528138741198 -0.15371908012562954 -0.01851406316788575 -0.13608058858370126 -0.020096993779807323 -0.11660704025073523 -0.021576610457194653 -0.09532069314521066 -0.022933006493035153 -0.07241147068467137 -0.02414756896073007 -0.047866271429081196 -0.025205759353498045 -0.021848618487275342 -0.026093063789533782 0.005711841505449844 -0.026798378877249212 0.034701717725197305 -0.02731012897059878 0.06523398012526847 -0.027619890774320338 0.09723725600064603 -0.02771882917733492 0.13083275662012042 -0.02760115698927114 0.16596670759295587 -0.027260998530915008 0.20273354430349014 -0.02669542345075017 0.2410719913462401 -0.02590179362146516 0.2810267238121659 -0.02488022104743031 0.32251343017550127 -0.02363137355681082 0.36552054946775736 -0.022158294500989013 0.4099381905105239 -0.02046462321466386 0.45570620740521295 -0.01855581407582174 0.5026969860641547 -0.016437769303876793 0.5508172730887838 -0.01411757207137396 0.5999340783993234 -0.011602555072108046 0.6499376235311842 -0.008900683024580266 0.7007004163595979 -0.006020045765100096 0.7521091579106617 -0.002969012679046145 0.8040473478751303 0.00024393796426068205 0.8564056352647808 0.003610172056642881 0.9090772756382557 0.0071209028391836385 0.961959025666701 0.01076710282529198 1.0149466379723127 0.014539565450171429 1.0679413191278662 0.01842858249480734 1.1208300075688082 0.022424313941458777 1.1735148547471017 0.02651600770248145 1.2258612245431202 0.030692918204809648 1.2777688573533146 0.03494286233718385 1.3290702382801278 0.03925388279250087 1.3796612360977711 0.04361200906632508 1.4293344696595092 0.048003767516286544 1.4779833399991835 0.052413133861710474 1.525359991652162 0.05682483866345415 1.5713591065919512 0.0612206389411691 1.6156986403177485 0.06558317906447238 1.6582786887697725 0.06989186358788856 1.6987952769403152 0.07412686392759822 1.7371562181609148 0.07826502436024912 1.7730521557116075 0.08228351349447319 1.8063977336433106 0.08615630448408094 1.836896737037939 0.08985700159933492 1.8644663417150735 0.09335644883855679 1.8888409467060658 0.09662442025313422 1.9099339069722845 0.09962874957140314 1.9275234303630153 0.1023358265611056 1.941514026190961 0.10471125857157779 1.9517335206208402 0.10671946161287714 1.9580782306676465 0.10832533518825159 1.960421437422684 0.10949370094914342 1.9586612212116283 0.11019097392131742 1.9527028214000863 0.11038573321778344 1.9424680501584577 0.1100491114654959 1.927879794469872 0.10915815008073382 1.9089203196729723 0.10769368058041628 1.8855329063770356 0.10564780121721017 1.857812335579981 0.10301809784622228 1.8257584194893075 0.09981888031936555 1.789631185973454 0.09607024115838227 1.7495405133585107 0.09180996627576567 1.7059132150653815 0.08707631276591216 1.6589675727323767 0.08191742750389815 1.6091804399100285 0.07637112391657233 1.5567645647758956 0.07047375254758283 1.5020507414672417 0.06424473566090748 1.4451121357723147 0.05769829682876369 1.3860277030407324 0.050835686034764634 1.324719781206046 0.04365836679394331 1.2610814639849859 0.03616485155761058 1.1949946988158755 0.028362837539541302 1.1263366116274571 0.020267052051625222 1.0550961244463022 0.011908146503353869 0.9813094490023058 0.003328914264430601 0.905171437687225 -0.005412818990648916 0.8269635715437971 -0.014248660029677444 0.7470816773959341 -0.023104537250106912 0.666009355643826 -0.031900380971892156 0.5842613715198385 -0.04056402034018904 0.5023940744513518 -0.04902033568033677 0.42094120675604463 -0.057214968565285275 0.3403745101512 -0.06508968584355365 0.2611655571455514 -0.07261439329106942 0.18358682353934774 -0.07974936819832629 0.10803622115813215 -0.08648366494539951 0.03457554196773288 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 1.6732941549300973
'\nsimulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold → \nday 1 complete → repeat for day 2 with different conditioned neuron\n'
from distutils.log import error
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[i, conditioned_neuron] - np.mean(record_r[i, :]))
else:
error_val = self.b*(record_r[i, conditioned_neuron] - record_r[i, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
# print(error_val, record_r[i, conditioned_neuron])
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = self.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
def initialize_network():
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[:10])
print(cn)
return network, r_simulation, cn
def plot_simulation(r_simulation, cn, pr):
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
# print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
return sorted_array, cn_new_idx
def simulate_day(network, r_simulation, cn, day_id, input=None):
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2])
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
return r_learn
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
n_days = 3
dict_manifold = []
print(dict_manifold)
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
network, r_simulation, cn = initialize_network()
plot_simulation(r_simulation, cn, dict_manifold[0][4])
# print(r_simulation[-1])
r_learn = r_simulation
for i in range(n_days):
r_learn = simulate_day(network, r_learn, cn, i+1, input=I)
plot_simulation(r_learn, cn, dict_manifold[i][4])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
[] Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 183
Participation Ratio: 4.76003040479506 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 4.76003040479506 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 4.534980465029076 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 6.059551746958571
'\nsimulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold → \nday 1 complete → repeat for day 2 with different conditioned neuron\n'
from matplotlib import gridspec
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
def plot_dynamics(recordings, cn=None):
# print(recordings.shape) # time_steps x n_neurons
fig = plt.figure()
gs = GridSpec(nrows=2, ncols=1, height_ratios=[1,4])
ax0 = fig.add_subplot(gs[0, 0])
ax0.plot(recordings[:, cn])
ax0.set_ylim([-1, 1])
ax1 = fig.add_subplot(gs[1, 0])
im = ax1.imshow(recordings.T, aspect="auto", cmap="Spectral", origin="lower")
ax1.set_yticks([cn])
ax1.set_yticklabels(labels=[f"CN_ID={cn}"])
# ax1.colorbar()
# fig.colorbar(im, cax=ax1, orientation='horizontal')
plt.show()
# plt.plot(recordings[:, cn])
# plt.show()
def plot_dynamics_ordered(recordings, criteria="mean", sort="ascending", cn=None):
if criteria == "mean":
arr_val = np.mean(recordings, axis=0)
elif criteria == "max":
arr_val = np.max(recordings, axis=0)
elif criteria == "max_initial":
arr_val = np.max(recordings[:100, :], axis=0)
arr1inds = arr_val.argsort()
if sort=="ascending":
recordings = recordings[:, arr1inds]
elif sort=="descending":
arr1inds = arr1inds[::-1]
recordings = recordings[:, arr1inds]
if cn is not None:
cn = np.where(arr1inds==cn)[0][0]
plot_dynamics(recordings, cn)
return arr1inds,cn
# from distutils.log import error
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[i, conditioned_neuron] - np.mean(record_r[i, :]))
else:
error_val = self.b*(record_r[i, conditioned_neuron] - record_r[i, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
# print(error_val, record_r[i, conditioned_neuron])
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = self.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
def initialize_network():
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[:10])
print(cn)
return network, r_simulation, cn
def plot_simulation(r_simulation, cn, pr):
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
# print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
return sorted_array, cn_new_idx
def simulate_day(network, r_simulation, cn, day_id, input=None):
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2])
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
return r_learn
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
n_days = 3
dict_manifold = []
print(dict_manifold)
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
network, r_simulation, cn = initialize_network()
plot_simulation(r_simulation, cn, dict_manifold[0][4])
# print(r_simulation[-1])
r_learn = r_simulation
for i in range(n_days):
r_learn = simulate_day(network, r_learn, cn, i+1, input=I)
plot_simulation(r_learn, cn, dict_manifold[i][4])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
[] Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 286
Participation Ratio: 5.833047343206401 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 5.833047343206401 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 1.8062129989037947 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 2.890549786411036
'\nsimulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold → \nday 1 complete → repeat for day 2 with different conditioned neuron\n'
# from distutils.log import error
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[i, conditioned_neuron] - np.mean(record_r[i, :]))
else:
error_val = self.b*(record_r[i, conditioned_neuron] - record_r[i, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
# print(error_val, record_r[i, conditioned_neuron])
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = self.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
def initialize_network():
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[:10])
print(cn)
return network, r_simulation, cn
def plot_simulation(r_simulation, cn, pr):
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
# print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
return sorted_array, cn_new_idx
def simulate_day(network, r_simulation, cn, day_id, input=None):
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2])
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
return r_learn
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
n_days = 3
dict_manifold = []
print(dict_manifold)
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
network, r_simulation, cn = initialize_network()
plot_simulation(r_simulation, cn, dict_manifold[0][4])
# print(r_simulation[-1])
r_learn = r_simulation
for i in range(n_days):
r_learn = simulate_day(network, r_learn, cn, i+1, input=I)
plot_simulation(r_learn, cn, dict_manifold[i][4])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
[] Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 267
Participation Ratio: 8.686451874916052 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 8.686451874916052 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 3.013976793312265
--------------------------------------------------------------------------- UFuncTypeError Traceback (most recent call last) ~/learning-manifolds/Models/Clopath/rnn.py in <module> <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=244'>245</a> r_learn = r_simulation <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=245'>246</a> for i in range(n_days): --> <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=246'>247</a> r_learn = simulate_day(network, r_learn, cn, i+1, input=I) <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=247'>248</a> plot_simulation(r_learn, cn, dict_manifold[i][4]) <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=248'>249</a> ~/learning-manifolds/Models/Clopath/rnn.py in simulate_day(network, r_simulation, cn, day_id, input) <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=214'>215</a> def simulate_day(network, r_simulation, cn, day_id, input=None): <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=215'>216</a> # train the network with our learning rule. calculate manifold, eig_vals etc --> <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=216'>217</a> r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2]) <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=217'>218</a> dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end)) <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=218'>219</a> return r_learn ~/learning-manifolds/Models/Clopath/rnn.py in learning(self, T, ext, conditioned_neuron, r0, day_id, manifold_eig_vec, manifold_eig_vals) <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=144'>145</a> self.e_minus = self.error <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=145'>146</a> self.dw = np.outer(np.dot(self.P, self.r), self.e_minus) --> <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=146'>147</a> self.J -= self.dw <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=147'>148</a> <a href='file:///home/mohitmk/learning-manifolds/Models/Clopath/rnn.py?line=148'>149</a> self.step(ext[i]) UFuncTypeError: Cannot cast ufunc 'subtract' output from dtype('complex128') to dtype('float64') with casting rule 'same_kind'
# from distutils.log import error
import numpy as np
import matplotlib.pyplot as plt
import plot_utils as putils
class RNN(object):
"""
Class to implement a general RNN Model
Parameters
---------------
N = number of parameters
g = gain constant of the network. g>1.0 is chaotic regime
p = connection probability
tau = neuron time constant
dt = simulation time constant
N_input = number of input units. 1 for sound, 1 for lickport starts moving
N_out = number of output units, 1 in our case which drives the lickport
"""
def __init__(self, N=500, g=1.5, p=0.1,
tau=0.1, dt=0.01, N_input=2,
N_out=1, T=1, b=0.01):
self.N = N
self.g = g
self.p = p
self.tau = tau
self.dt = dt
self.N_input = N_input
self.N_out = N_out
self.b = b
# Make the J matrix
mask = np.random.rand(self.N,self.N)<self.p
np.fill_diagonal(mask,np.zeros(self.N))
self.mask = mask
self.J = self.g / np.sqrt(self.p*self.N) * np.random.randn(self.N,self.N) * mask
self.W_in = 2*np.random.randn(self.N, self.N_input) - 1
self.W_out = 2*np.random.randn(self.N_out, self.N) - 1
self.W_fb = 2*np.random.randn(self.N, 1) - 1
def step(self, ext):
# print(f"{np.dot(self.J, self.z).shape}, {np.dot(self.W_in, ext.T).shape}")
self.r = self.r + \
self.dt/self.tau * \
(-self.r + np.dot(self.J, self.z) + np.dot(self.W_in, ext.T))
self.z = np.tanh(self.r)
def add_input(self, I, plot=False):
self.ext = np.zeros((int(T/dt), self.N_input))
if I.shape[-1] == 1:
self.ext[:, 0] = I
else:
self.ext = I
if plot:
plt.plot(self.ext)
plt.show()
return self.ext
def simulate(self, T, ext, r0=None):
time = np.arange(0, T, self.dt)
time_steps = len(time)
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0
self.z = np.tanh(self.r)
#simulation for time_step steps
record_r = np.zeros((time_steps,self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
# print(ext[i].shape)
self.step(self.ext[i])
record_r[i+1, :] = self.r
return self.z, record_r
def initialize_cursor(self, cursor_distance_initial):
"""
cursor == lickport
everything in m/s
cursor_velocity must be dependent on CN activity but right now we just let it be constant
"""
self.cursor_velocity = 0.05
self.cursor_distance = cursor_distance_initial
self.cursor_distance_initial = cursor_distance_initial
def learning(self, T, ext, conditioned_neuron, r0=None, day_id=None, manifold_eig_vec=None, manifold_eig_vals=None):
self.conditioned_neuron = conditioned_neuron
self.current_day_id = day_id
self.initialize_cursor(1)
time_steps = int(T/self.dt)
self.P = np.eye(self.N, self.N)*0.05
if r0 is None:
r0 = 2*np.random.randn(self.N)-1.
if ext is None:
ext = np.zeros((time_steps, self.N_input))
self.ext = ext
self.r = r0 # remember to give previous trial r0 to the network
self.z = np.tanh(self.r)
record_r = np.zeros((time_steps, self.N))
record_r[0,:] = self.r
for i in range(time_steps-1):
"""
abcdefghijklmnopqrstuvwxyz
"""
if day_id==0:
error_val = self.b*(record_r[i, conditioned_neuron] - np.mean(record_r[i, :]))
else:
error_val = self.b*(record_r[i, conditioned_neuron] - record_r[i, :]@manifold_eig_vec[:, manifold_eig_vals.argmax()])
# this looks good except only 1 max eig_vec is taken, i.e only the first dimension. This is something like
# learning vector. ask kayvon
# print(error_val, record_r[i, conditioned_neuron])
# error = b*(CN_today(t) - r(t)*Manifold_yesterday) for day 1:x
# for day 0, we can keep it
# error = b*(CN_today(t) - average_activity(t-1)). The difference has to be high to compensate for small b value??
# print(error_val)
# print(self.W_fb.shape)
self.error = self.W_fb*error_val
if i%2 == 0:
Pr = np.dot(self.P, self.r)
self.P -= np.outer(Pr, self.r).dot(self.P)/(1+np.dot(self.r, Pr))
self.e_minus = self.error
self.dw = np.outer(np.dot(self.P, self.r), self.e_minus)
self.J -= self.dw
self.step(ext[i])
record_r[i+1, :] = self.r
if self.z[self.conditioned_neuron] >= 0.3:
self.cursor_distance -= self.cursor_velocity
return record_r, np.tanh(record_r)
def participation_ratio(self, eig_vals):
return (np.sum(eig_vals.real)**2)/(np.sum(eig_vals.real**2))
def calculate_manifold(self, T, trials, I, pulse_end):
time_steps = I.shape[0]
ext = np.zeros((time_steps, self.N_input))
ext[:, 0] = I
npoints = time_steps-pulse_end
activity = np.zeros((trials*npoints,self.N))
for i in range(trials):
z_end, r_simulation = self.simulate(T, ext=ext)
z_simulation = np.tanh(r_simulation)
activity[i*npoints:(i+1)*npoints, :] = z_simulation[pulse_end:, :]
# print(f"{i+1} completed")
print(f"Calculating Manifold: time_steps={time_steps}, npoints={npoints}, trials={trials}, activity.shape={activity.shape}")
cov = np.cov(activity.T)
eig_val, eig_vec = np.linalg.eig(cov)
pr = self.participation_ratio(eig_val)
activity_manifold = activity @ eig_vec
return activity_manifold, activity, eig_val, eig_vec, pr, cov
def square_wave(amplitude, start, end, T, dt):
time_steps = int(T/dt)
wave = np.zeros(time_steps)
assert(end <= time_steps)
wave[start:end] = amplitude
return wave
def initialize_network():
# initialize the network
network = RNN(N=N,g=g,p=p,tau=tau,dt=dt,N_input=N_in, T=T)
network.add_input(I)
# simulate the network for T time and find the manifold, eig_vals etc
z_end, r_simulation = network.simulate(T, ext=None)
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
# choose a conditioned neuron as one of the top 10 firing neurons
cn = np.random.choice(np.max(r_simulation[:100, :], axis=0).argsort()[:10])
print(cn)
return network, r_simulation, cn
def plot_simulation(r_simulation, cn, pr):
# plot dynamics of network during simulation, ordered and unordered. Also calculate the PR, 90% cutoff var.
putils.plot_dynamics(np.tanh(r_simulation), cn=cn)
sorted_array, cn_new_idx = putils.plot_dynamics_ordered(np.tanh(r_simulation), criteria="max_initial", sort="descending", cn=cn)
print(f"Participation Ratio: {pr}")
# print(np.where(np.cumsum(eig_val.real)/np.sum(eig_val.real)>0.9)[0][0])
return sorted_array, cn_new_idx
def simulate_day(network, r_simulation, cn, day_id, input=None):
# train the network with our learning rule. calculate manifold, eig_vals etc
r_learn, z_learn = network.learning(T, None, conditioned_neuron=cn, r0=r_simulation[-1], day_id=day_id, manifold_eig_vec=dict_manifold[-1][3], manifold_eig_vals=dict_manifold[-1][2])
dict_manifold.append(network.calculate_manifold(T, 10, I, pulse_end=pulse_end))
return r_learn
N = 500
g = 1.5
p = 0.1
tau = 0.1
dt = 0.01
N_in = 2
T = 5
n_days = 3
dict_manifold = []
print(dict_manifold)
pulse_amplitude = 1
pulse_start = 10
pulse_end = 30
pulse_length = pulse_end-pulse_start
# make the input pulse
I = square_wave(pulse_amplitude, pulse_start, pulse_end, T, dt)
network, r_simulation, cn = initialize_network()
plot_simulation(r_simulation, cn, dict_manifold[0][4])
# print(r_simulation[-1])
r_learn = r_simulation
for i in range(n_days):
r_learn = simulate_day(network, r_learn, cn, i+1, input=I)
plot_simulation(r_learn, cn, dict_manifold[i][4])
"""
simulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold →
day 1 complete → repeat for day 2 with different conditioned neuron
"""
[] Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500) 264
Participation Ratio: 6.760041312641578 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 6.760041312641578 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 3.960622379151204 Calculating Manifold: time_steps=500, npoints=470, trials=10, activity.shape=(4700, 500)
Participation Ratio: 4.671972414230371
'\nsimulate → calculate manifold → feedback learning with cursor velocity → simulate → calculate manifold → \nday 1 complete → repeat for day 2 with different conditioned neuron\n'